Close

@InProceedings{SouzaLBGRALF:2019:BrExNe,
               author = "Souza, Roberto and Lucena, Oeslle and Bento, Mariana and Garrafa, 
                         Julia and Rittner, Let{\'{\i}}cia and Appenzeller, Simone and 
                         Lotufo, Roberto and Frayne, Richard",
          affiliation = "{University of Calgary} and {King’s College London} and 
                         {University of Calgary} and {University of Campinas} and 
                         {University of Campinas} and {University of Campinas} and 
                         {University of Campinas} and {University of Calgary}",
                title = "Brain extraction network trained with “silver standard” data and 
                         fine-tuned with manual annotation for improved segmentation",
            booktitle = "Proceedings...",
                 year = "2019",
               editor = "Oliveira, Luciano Rebou{\c{c}}as de and Sarder, Pinaki and Lage, 
                         Marcos and Sadlo, Filip",
         organization = "Conference on Graphics, Patterns and Images, 32. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "skull-stripping, brain extraction, MRI, segmentation.",
             abstract = "Training convolutional neural networks (CNNs) for medical image 
                         segmentation often requires large and representative sets of 
                         images and their corresponding annotations. Obtaining annotated 
                         images usually requires manual intervention, which is expensive 
                         and time consuming, as it typically requires a specialist. An 
                         alternative approach is to leverage existing automatic 
                         segmentation tools and combine them to create consensus-based 
                         silver-standards annotations. A drawback to this approach is that 
                         silver-standards are usually smooth and this smoothness is 
                         transmitted to the output segmentation of the network. Our 
                         proposal is to use a two-staged approach. First, silver-standard 
                         datasets are used to generate a large set of annotated images in 
                         order to train the brain extraction network from scratch. Second, 
                         fine-tuning is performed using much smaller amounts of manually 
                         annotated data so that the network can learn the finer details 
                         that are not preserved in the silver-standard data. As an example, 
                         our two-staged brain extraction approach has been shown to 
                         outperform seven stateof- the-art techniques across three 
                         different public datasets. Our results also suggest that CNNs can 
                         potentially capture inter-rater annotation variability between 
                         experts who annotate the same set of images following the same 
                         guidelines, and also adapt to different annotation guidelines.",
  conference-location = "Rio de Janeiro, RJ, Brazil",
      conference-year = "28-31 Oct. 2019",
                  doi = "10.1109/SIBGRAPI.2019.00039",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2019.00039",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/3U2N8NH",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/3U2N8NH",
           targetfile = "SIBGRAPI_Skull_stripping_Fine_tuning.pdf",
        urlaccessdate = "2024, Apr. 28"
}


Close